Change the context-switch interface. Get rid of
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 6 Jan 2006 17:14:29 +0000 (18:14 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Fri, 6 Jan 2006 17:14:29 +0000 (18:14 +0100)
context_switch_finalise(). Instead provide a back-call
context_switch_done() for situations where arch-specific
context_switch() function does not return to the caller,
or needs to do some parts of state restoration with
interrupts enabled.

Get rid of ugly hack in arch/ia64.

Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/ia64/xen/process.c
xen/arch/ia64/xen/xenmisc.c
xen/arch/x86/domain.c
xen/common/schedule.c
xen/include/xen/sched-if.h
xen/include/xen/sched.h

index ba0f7b4aab50732d8e8d85f0df072a5395a0cfc0..e31e2fa38a79645e2ca1ecd45dcf082c1c47a56f 100644 (file)
@@ -71,12 +71,10 @@ void schedule_tail(struct vcpu *next)
        //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
        //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
 
-    // TG: Real HACK FIXME.
-    // This is currently necessary because when a new domain is started, 
-    // the context_switch function of xen/common/schedule.c(__enter_scheduler)
-    // never returns.  Therefore, the lock must be released.
-    // schedule_tail is only called when a domain is started.
-    spin_unlock_irq(&schedule_data[current->processor].schedule_lock);
+    // This is necessary because when a new domain is started, our
+    // implementation of context_switch() does not return (switch_to() has
+    // special and peculiar behaviour in this case).
+    context_switch_done();
 
        /* rr7 will be postponed to last point when resuming back to guest */
     if(VMX_DOMAIN(current)){
index 058f72fdea9c22b1d0fd6de1950abc1b2b3cffc6..c3605f3dee2b470b457f3b6078410ab3ddc05205 100644 (file)
@@ -329,11 +329,6 @@ if (!i--) { printk("+",id); i = 1000000; }
     }
 }
 
-void context_switch_finalise(struct vcpu *next)
-{
-       /* nothing to do */
-}
-
 void continue_running(struct vcpu *same)
 {
        /* nothing to do */
index e2c75e350dd8049c12b588b159051d6180a0d40e..0dcf94366ecafc1a310981442a490ad46bd0dccb 100644 (file)
@@ -46,7 +46,6 @@ boolean_param("noreboot", opt_noreboot);
 
 struct percpu_ctxt {
     struct vcpu *curr_vcpu;
-    unsigned int context_not_finalised;
     unsigned int dirty_segment_mask;
 } __cacheline_aligned;
 static struct percpu_ctxt percpu_ctxt[NR_CPUS];
@@ -758,21 +757,9 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
          !is_idle_domain(next->domain) )
     {
         __context_switch();
-        percpu_ctxt[cpu].context_not_finalised = 1;
-    }
-}
-
-void context_switch_finalise(struct vcpu *next)
-{
-    unsigned int cpu = smp_processor_id();
 
-    ASSERT(local_irq_is_enabled());
-
-    if ( percpu_ctxt[cpu].context_not_finalised )
-    {
-        percpu_ctxt[cpu].context_not_finalised = 0;
-
-        BUG_ON(percpu_ctxt[cpu].curr_vcpu != next);
+        context_switch_done();
+        ASSERT(local_irq_is_enabled());
 
         if ( VMX_DOMAIN(next) )
         {
@@ -785,6 +772,10 @@ void context_switch_finalise(struct vcpu *next)
             vmx_load_msrs(next);
         }
     }
+    else
+    {
+        context_switch_done();
+    }
 
     schedule_tail(next);
     BUG();
index ee9c21f324ae2de5b3b8561def0d63bea74f1e4c..9e7b088542c54007c569aa9a8a3ab9afcd7464b1 100644 (file)
@@ -474,11 +474,18 @@ static void __enter_scheduler(void)
              prev->domain->domain_id, prev->vcpu_id,
              next->domain->domain_id, next->vcpu_id);
 
+    schedule_data[cpu].context_switch_in_progress = 1;
     context_switch(prev, next);
+    if ( schedule_data[cpu].context_switch_in_progress )
+        context_switch_done();
+}
 
+void context_switch_done(void)
+{
+    unsigned int cpu = smp_processor_id();
+    ASSERT(schedule_data[cpu].context_switch_in_progress);
     spin_unlock_irq(&schedule_data[cpu].schedule_lock);
-
-    context_switch_finalise(next);
+    schedule_data[cpu].context_switch_in_progress = 0;
 }
 
 /* No locking needed -- pointer comparison is safe :-) */
index 26d7bada8a74a312fcbb8c2a6b8c6818c8187229..86ca33591c1b0a9f9beb67d871815c0bce50848c 100644 (file)
 
 struct schedule_data {
     spinlock_t          schedule_lock;  /* spinlock protecting curr        */
-    struct vcpu *curr;           /* current task                    */
-    struct vcpu *idle;           /* idle task for this cpu          */
+    struct vcpu        *curr;           /* current task                    */
+    struct vcpu        *idle;           /* idle task for this cpu          */
     void               *sched_priv;
     struct ac_timer     s_timer;        /* scheduling timer                */
     unsigned long       tick;           /* current periodic 'tick'         */
+    int                 context_switch_in_progress;
 #ifdef BUCKETS
     u32                 hist[BUCKETS];  /* for scheduler latency histogram */
 #endif
index 7728a2c590e3c1b3ef9f020a41950fa4efaccd36..298eb9506ab46d72af3accfc183371cdb8059376 100644 (file)
@@ -287,13 +287,17 @@ extern void context_switch(
     struct vcpu *next);
 
 /*
- * On some architectures (notably x86) it is not possible to entirely load
- * @next's context with interrupts disabled. These may implement a function to
- * finalise loading the new context after interrupts are re-enabled. This
- * function is not given @prev and is not permitted to access it.
+ * If context_switch() does not return to the caller, or you need to perform
+ * some aspects of state restoration with interrupts enabled, then you must
+ * call context_switch_done() at a suitable safe point.
+ * 
+ * As when returning from context_switch(), the caller must ensure that the
+ * local CPU is no longer running in the previous VCPU's context, and that the
+ * context is saved to memory. Alternatively, if implementing lazy context
+ * switching, ensure that invoking sync_vcpu_execstate() will switch and
+ * commit the previous VCPU's state.
  */
-extern void context_switch_finalise(
-    struct vcpu *next);
+extern void context_switch_done(void);
 
 /* Called by the scheduler to continue running the current VCPU. */
 extern void continue_running(